{
p2m_type_t p2mt;
+ rc = xsm_mmu_normal_update(d, pg_owner, req.val);
+ if ( rc )
+ break;
rc = -EINVAL;
req.ptr -= cmd;
(unsigned long)(req.ptr & ~PAGE_MASK));
page = mfn_to_page(mfn);
- rc = xsm_mmu_normal_update(d, req.val, page);
- if ( rc ) {
- unmap_domain_page_with_cache(va, &mapcache);
- put_page(page);
- put_gfn(pt_owner, gmfn);
- break;
- }
-
if ( page_lock(page) )
{
switch ( page->u.inuse.type_info & PGT_type_mask )
mfn = req.ptr >> PAGE_SHIFT;
gpfn = req.val;
+ rc = xsm_mmu_machphys_update(d, mfn);
+ if ( rc )
+ break;
+
if ( unlikely(!get_page_from_pagenr(mfn, pg_owner)) )
{
MEM_LOG("Could not get page for mach->phys update");
break;
}
- rc = xsm_mmu_machphys_update(d, mfn_to_page(mfn));
- if ( rc )
- break;
-
set_gpfn_from_mfn(mfn, gpfn);
paging_mark_dirty(pg_owner, mfn);
perfc_incr(calls_to_update_va);
+ rc = xsm_update_va_mapping(d, pg_owner, val);
+ if ( rc )
+ return rc;
+
rc = -EINVAL;
pl1e = guest_map_l1e(v, va, &gl1mfn);
if ( unlikely(!pl1e || !get_page_from_pagenr(gl1mfn, d)) )
goto out;
}
- rc = xsm_update_va_mapping(d, val, gl1pg);
- if ( rc ) {
- page_unlock(gl1pg);
- put_page(gl1pg);
- goto out;
- }
-
rc = mod_l1_entry(pl1e, val, gl1mfn, 0, v, pg_owner);
page_unlock(gl1pg);
int (*getidletime) (void);
int (*machine_memory_map) (void);
int (*domain_memory_map) (struct domain *d);
- int (*mmu_normal_update) (struct domain *d,
- intpte_t fpte, struct page_info *page);
- int (*mmu_machphys_update) (struct domain *d, struct page_info *page);
- int (*update_va_mapping) (struct domain *d,
- l1_pgentry_t pte,
- struct page_info *page);
+ int (*mmu_normal_update) (struct domain *d, struct domain *f,
+ intpte_t fpte);
+ int (*mmu_machphys_update) (struct domain *d, unsigned long mfn);
+ int (*update_va_mapping) (struct domain *d, struct domain *f,
+ l1_pgentry_t pte);
int (*add_to_physmap) (struct domain *d1, struct domain *d2);
int (*sendtrigger) (struct domain *d);
int (*bind_pt_irq) (struct domain *d, struct xen_domctl_bind_pt_irq *bind);
return xsm_call(domain_memory_map(d));
}
-static inline int xsm_mmu_normal_update (struct domain *d,
- intpte_t fpte, struct page_info *page)
+static inline int xsm_mmu_normal_update (struct domain *d, struct domain *f,
+ intpte_t fpte)
{
- return xsm_call(mmu_normal_update(d, fpte, page));
+ return xsm_call(mmu_normal_update(d, f, fpte));
}
-static inline int xsm_mmu_machphys_update (struct domain *d, struct page_info *page)
+static inline int xsm_mmu_machphys_update (struct domain *d, unsigned long mfn)
{
- return xsm_call(mmu_machphys_update(d, page));
+ return xsm_call(mmu_machphys_update(d, mfn));
}
-static inline int xsm_update_va_mapping(struct domain *d,
- l1_pgentry_t pte,
- struct page_info *page)
+static inline int xsm_update_va_mapping(struct domain *d, struct domain *f,
+ l1_pgentry_t pte)
{
- return xsm_call(update_va_mapping(d, pte, page));
+ return xsm_call(update_va_mapping(d, f, pte));
}
static inline int xsm_add_to_physmap(struct domain *d1, struct domain *d2)
return 0;
}
-static int dummy_mmu_normal_update (struct domain *d,
- intpte_t fpte, struct page_info *page)
+static int dummy_mmu_normal_update (struct domain *d, struct domain *f,
+ intpte_t fpte)
{
return 0;
}
-static int dummy_mmu_machphys_update (struct domain *d, struct page_info *page)
+static int dummy_mmu_machphys_update (struct domain *d, unsigned long mfn)
{
return 0;
}
-static int dummy_update_va_mapping (struct domain *d,
- l1_pgentry_t pte,
- struct page_info *page)
+static int dummy_update_va_mapping (struct domain *d, struct domain *f,
+ l1_pgentry_t pte)
{
return 0;
}
return rc;
}
+static int get_mfn_sid(unsigned long mfn, u32 *sid)
+{
+ int rc = 0;
+ struct page_info *page;
+
+ if ( mfn_valid(mfn) )
+ {
+ /*mfn is valid if this is a page that Xen is tracking!*/
+ page = mfn_to_page(mfn);
+ rc = get_page_sid(page, sid);
+ }
+ else
+ {
+ /*Possibly an untracked IO page?*/
+ rc = security_iomem_sid(mfn, sid);
+ }
+
+ return rc;
+}
+
static int flask_memory_adjust_reservation(struct domain *d1, struct domain *d2)
{
return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__ADJUST);
return domain_has_perm(current->domain, d, SECCLASS_MMU, MMU__MEMORYMAP);
}
-static int flask_mmu_normal_update(struct domain *d,
- intpte_t fpte, struct page_info *page)
+static int flask_mmu_normal_update(struct domain *d, struct domain *f,
+ intpte_t fpte)
{
int rc = 0;
u32 map_perms = MMU__MAP_READ;
+ unsigned long fmfn;
struct domain_security_struct *dsec;
u32 fsid;
if ( l1e_get_flags(l1e_from_intpte(fpte)) & _PAGE_RW )
map_perms |= MMU__MAP_WRITE;
- rc = get_page_sid(page, &fsid);
+ fmfn = get_gfn_untyped(f, l1e_get_pfn(l1e_from_intpte(fpte)));
+
+ rc = get_mfn_sid(fmfn, &fsid);
if ( rc )
return rc;
return avc_has_perm(dsec->sid, fsid, SECCLASS_MMU, map_perms, NULL);
}
-static int flask_mmu_machphys_update(struct domain *d, struct page_info *page)
+static int flask_mmu_machphys_update(struct domain *d, unsigned long mfn)
{
int rc = 0;
u32 psid;
struct domain_security_struct *dsec;
dsec = d->ssid;
- rc = get_page_sid(page, &psid);
+ rc = get_mfn_sid(mfn, &psid);
if ( rc )
return rc;
return avc_has_perm(dsec->sid, psid, SECCLASS_MMU, MMU__UPDATEMP, NULL);
}
-static int flask_update_va_mapping(struct domain *d,
- l1_pgentry_t pte, struct page_info *page)
+static int flask_update_va_mapping(struct domain *d, struct domain *f,
+ l1_pgentry_t pte)
{
int rc = 0;
u32 psid;
u32 map_perms = MMU__MAP_READ;
+ unsigned long mfn;
struct domain_security_struct *dsec;
dsec = d->ssid;
- rc = get_page_sid(page, &psid);
+ mfn = get_gfn_untyped(f, l1e_get_pfn(pte));
+ rc = get_mfn_sid(mfn, &psid);
if ( rc )
return rc;